From 16ca18af9004f25512a33bf60e131bfe2a6dac1a Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Tue, 22 Mar 2005 19:26:37 +0000 Subject: [PATCH] bitkeeper revision 1.1159.272.8 (4240716dixo5jLBihZPvbRrP21dn4g) Schedule page scrubbing for dead domains off the per-cpu periodic ticker. We take 10% of busy cpus and all of idle cpu time. Signed-off-by: Keir Fraser --- xen/arch/x86/domain.c | 3 ++ xen/common/page_alloc.c | 89 +++++++++++++++++++++++++++++++++------ xen/common/schedule.c | 2 + xen/include/xen/mm.h | 13 ++++++ xen/include/xen/softirq.h | 3 +- 5 files changed, 97 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 9c81c9c718..812d14c506 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -69,7 +69,10 @@ static __attribute_used__ void idle_loop(void) { irq_stat[cpu].idle_timestamp = jiffies; while ( !softirq_pending(cpu) ) + { + page_scrub_schedule_work(); default_idle(); + } do_softirq(); } } diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index d8ac2d4ca0..8637b8cea5 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -28,6 +28,7 @@ #include #include #include +#include #include /* @@ -551,7 +552,6 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order) { int i, drop_dom_ref; struct domain *d = pg->u.inuse.domain; - void *p; ASSERT(!in_irq()); @@ -579,26 +579,31 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order) pg[i].tlbflush_timestamp = tlbflush_current_time(); pg[i].u.free.cpu_mask = 1 << d->processor; list_del(&pg[i].list); + } + + d->tot_pages -= 1 << order; + drop_dom_ref = (d->tot_pages == 0); + + spin_unlock_recursive(&d->page_alloc_lock); + if ( likely(!test_bit(DF_DYING, &d->flags)) ) + { + free_heap_pages(MEMZONE_DOM, pg, order); + } + else + { /* * Normally we expect a domain to clear pages before freeing them, * if it cares about the secrecy of their contents. However, after * a domain has died we assume responsibility for erasure. */ - if ( unlikely(test_bit(DF_DYING, &d->flags)) ) + for ( i = 0; i < (1 << order); i++ ) { - p = map_domain_mem(page_to_phys(&pg[i])); - clear_page(p); - unmap_domain_mem(p); + spin_lock(&page_scrub_lock); + list_add(&pg[i].list, &page_scrub_list); + spin_unlock(&page_scrub_lock); } } - - d->tot_pages -= 1 << order; - drop_dom_ref = (d->tot_pages == 0); - - spin_unlock_recursive(&d->page_alloc_lock); - - free_heap_pages(MEMZONE_DOM, pg, order); } else { @@ -616,3 +621,63 @@ unsigned long avail_domheap_pages(void) { return avail[MEMZONE_DOM]; } + + + +/************************* + * PAGE SCRUBBING + */ + +static spinlock_t page_scrub_lock; +struct list_head page_scrub_list; + +static void page_scrub_softirq(void) +{ + struct list_head *ent; + struct pfn_info *pg; + void *p; + int i; + s_time_t start = NOW(); + + /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */ + do { + spin_lock(&page_scrub_lock); + + if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) ) + { + spin_unlock(&page_scrub_lock); + return; + } + + /* Peel up to 16 pages from the list. */ + for ( i = 0; i < 16; i++ ) + if ( (ent = ent->next) == &page_scrub_list ) + break; + + /* Remove peeled pages from the list. */ + ent->next->prev = &page_scrub_list; + page_scrub_list.next = ent->next; + + spin_unlock(&page_scrub_lock); + + /* Working backwards, scrub each page in turn. */ + while ( ent != &page_scrub_list ) + { + pg = list_entry(ent, struct pfn_info, list); + ent = ent->prev; + p = map_domain_mem(page_to_phys(pg)); + clear_page(p); + unmap_domain_mem(p); + free_heap_pages(MEMZONE_DOM, pg, 0); + } + } while ( (NOW() - start) < MILLISECS(1) ); +} + +static __init int page_scrub_init(void) +{ + spin_lock_init(&page_scrub_lock); + INIT_LIST_HEAD(&page_scrub_list); + open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq); + return 0; +} +__initcall(page_scrub_init); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 62f764e9d9..d16c2192f4 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -437,6 +437,8 @@ static void t_timer_fn(unsigned long unused) if ( !is_idle_task(d) && update_dom_time(d) ) send_guest_virq(d, VIRQ_TIMER); + page_scrub_schedule_work(); + t_timer[d->processor].expires = NOW() + MILLISECS(10); add_ac_timer(&t_timer[d->processor]); } diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 12242ca491..33ce8ac603 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -2,6 +2,10 @@ #ifndef __XEN_MM_H__ #define __XEN_MM_H__ +#include +#include +#include + struct domain; struct pfn_info; @@ -34,6 +38,15 @@ unsigned long avail_domheap_pages(void); #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) #define free_domheap_page(_p) (free_domheap_pages(_p,0)) +/* Automatic page scrubbing for dead domains. */ +extern spinlock_t page_scrub_lock; +extern struct list_head page_scrub_list; +#define page_scrub_schedule_work() \ + do { \ + if ( !list_empty(&page_scrub_list) ) \ + raise_softirq(PAGE_SCRUB_SOFTIRQ); \ + } while ( 0 ) + #include #endif /* __XEN_MM_H__ */ diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h index 6180dae775..a538540247 100644 --- a/xen/include/xen/softirq.h +++ b/xen/include/xen/softirq.h @@ -7,7 +7,8 @@ #define NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ 2 #define KEYPRESS_SOFTIRQ 3 #define NMI_SOFTIRQ 4 -#define NR_SOFTIRQS 5 +#define PAGE_SCRUB_SOFTIRQ 5 +#define NR_SOFTIRQS 6 #ifndef __ASSEMBLY__ -- 2.30.2